x86: Revert Cset 20334:dcc5d5d954e9
authorKeir Fraser <keir.fraser@citrix.com>
Tue, 26 Jan 2010 15:52:30 +0000 (15:52 +0000)
committerKeir Fraser <keir.fraser@citrix.com>
Tue, 26 Jan 2010 15:52:30 +0000 (15:52 +0000)
Recording old MSI info doesn't solve all the corner cases
when guest's irq migration occurs.

Signed-off-by : Xiantao Zhang <xiantao.zhang@intel.com>

xen/arch/x86/hvm/vmsi.c
xen/drivers/passthrough/io.c
xen/include/xen/hvm/irq.h

index aaf0d152e9434b2f45915b9dc1937842b79f9e6f..d9ec9fb0a4b0e06bdb5bc78727c85f078f7103fc 100644 (file)
@@ -92,11 +92,8 @@ int vmsi_deliver(struct domain *d, int pirq)
     case dest_LowestPrio:
     {
         target = vlapic_lowest_prio(d, NULL, 0, dest, dest_mode);
-        if ( target != NULL ) {
+        if ( target != NULL )
             vmsi_inj_irq(d, target, vector, trig_mode, delivery_mode);
-            hvm_irq_dpci->mirq[pirq].gmsi.old_gvec =
-                                    hvm_irq_dpci->mirq[pirq].gmsi.gvec;
-        }
         else
             HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "null round robin: "
                         "vector=%x delivery_mode=%x\n",
@@ -109,12 +106,9 @@ int vmsi_deliver(struct domain *d, int pirq)
     {
         for_each_vcpu ( d, v )
             if ( vlapic_match_dest(vcpu_vlapic(v), NULL,
-                                   0, dest, dest_mode) ) {
+                                   0, dest, dest_mode) )
                 vmsi_inj_irq(d, vcpu_vlapic(v),
                              vector, trig_mode, delivery_mode);
-                hvm_irq_dpci->mirq[pirq].gmsi.old_gvec =
-                                    hvm_irq_dpci->mirq[pirq].gmsi.gvec;
-            }
         break;
     }
 
index c0d271e81d46597d08638a4322165a7759d9f7a0..87e5a98ddcb686f4327a73219066fc859ae5ceea 100644 (file)
@@ -164,9 +164,7 @@ int pt_irq_create_bind_vtd(
         {
             hvm_irq_dpci->mirq[pirq].flags = HVM_IRQ_DPCI_MACH_MSI |
                                              HVM_IRQ_DPCI_GUEST_MSI;
-            hvm_irq_dpci->mirq[pirq].gmsi.old_gvec = pt_irq_bind->u.msi.gvec;
             hvm_irq_dpci->mirq[pirq].gmsi.gvec = pt_irq_bind->u.msi.gvec;
-            hvm_irq_dpci->mirq[pirq].gmsi.old_gflags = pt_irq_bind->u.msi.gflags;
             hvm_irq_dpci->mirq[pirq].gmsi.gflags = pt_irq_bind->u.msi.gflags;
             /* bind after hvm_irq_dpci is setup to avoid race with irq handler*/
             rc = pirq_guest_bind(d->vcpu[0], pirq, 0);
@@ -180,8 +178,6 @@ int pt_irq_create_bind_vtd(
             {
                 hvm_irq_dpci->mirq[pirq].gmsi.gflags = 0;
                 hvm_irq_dpci->mirq[pirq].gmsi.gvec = 0;
-                hvm_irq_dpci->mirq[pirq].gmsi.old_gvec = 0;
-                hvm_irq_dpci->mirq[pirq].gmsi.old_gflags = 0;
                 hvm_irq_dpci->mirq[pirq].flags = 0;
                 clear_bit(pirq, hvm_irq_dpci->mapping);
                 spin_unlock(&d->event_lock);
@@ -200,11 +196,8 @@ int pt_irq_create_bind_vtd(
 
             /* if pirq is already mapped as vmsi, update the guest data/addr */
             if ( hvm_irq_dpci->mirq[pirq].gmsi.gvec != pt_irq_bind->u.msi.gvec ||
-                hvm_irq_dpci->mirq[pirq].gmsi.gflags != pt_irq_bind->u.msi.gflags) {
-                hvm_irq_dpci->mirq[pirq].gmsi.old_gvec =
-                                    hvm_irq_dpci->mirq[pirq].gmsi.gvec;
-                hvm_irq_dpci->mirq[pirq].gmsi.old_gflags =
-                                    hvm_irq_dpci->mirq[pirq].gmsi.gflags;
+                    hvm_irq_dpci->mirq[pirq].gmsi.gflags != pt_irq_bind->u.msi.gflags) {
+
                 hvm_irq_dpci->mirq[pirq].gmsi.gvec = pt_irq_bind->u.msi.gvec;
                 hvm_irq_dpci->mirq[pirq].gmsi.gflags = pt_irq_bind->u.msi.gflags;
             }
@@ -435,21 +428,14 @@ void hvm_dpci_msi_eoi(struct domain *d, int vector)
           pirq = find_next_bit(hvm_irq_dpci->mapping, d->nr_pirqs, pirq + 1) )
     {
         if ( (!(hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_MACH_MSI)) ||
-                (hvm_irq_dpci->mirq[pirq].gmsi.gvec != vector &&
-                 hvm_irq_dpci->mirq[pirq].gmsi.old_gvec != vector) )
+                (hvm_irq_dpci->mirq[pirq].gmsi.gvec != vector) )
             continue;
 
-        if ( hvm_irq_dpci->mirq[pirq].gmsi.gvec == vector ) {
-            dest = hvm_irq_dpci->mirq[pirq].gmsi.gflags & VMSI_DEST_ID_MASK;
-            dest_mode = !!(hvm_irq_dpci->mirq[pirq].gmsi.gflags & VMSI_DM_MASK);
-        } else {
-            dest = hvm_irq_dpci->mirq[pirq].gmsi.old_gflags & VMSI_DEST_ID_MASK;
-            dest_mode = !!(hvm_irq_dpci->mirq[pirq].gmsi.old_gflags & VMSI_DM_MASK);
-        }
+        dest = hvm_irq_dpci->mirq[pirq].gmsi.gflags & VMSI_DEST_ID_MASK;
+        dest_mode = !!(hvm_irq_dpci->mirq[pirq].gmsi.gflags & VMSI_DM_MASK);
         if ( vlapic_match_dest(vcpu_vlapic(current), NULL, 0, dest, dest_mode) )
             break;
     }
-
     if ( pirq < d->nr_pirqs )
         __msi_pirq_eoi(d, pirq);
     spin_unlock(&d->event_lock);
index 9e2eedbf6a7f10b99b71d0b3246ae94bdf47c676..c1747ed73c6172891a5090c274a7b0b755e7be06 100644 (file)
@@ -58,10 +58,8 @@ struct dev_intx_gsi_link {
 #define GLFAGS_SHIFT_TRG_MODE       15
 
 struct hvm_gmsi_info {
-    uint16_t gvec;
-    uint16_t old_gvec;
+    uint32_t gvec;
     uint32_t gflags;
-    uint32_t old_gflags;
     int dest_vcpu_id; /* -1 :multi-dest, non-negative: dest_vcpu_id */
 };